put_user(regs->rcx, rsp-11) )
{
DPRINTK("Error while creating failsafe callback frame.\n");
- domain_crash();
+ domain_crash(n->domain);
}
regs->entry_vector = TRAP_syscall;
* not enough information in just a gpte to figure out how to
* (re-)shadow this entry.
*/
- domain_crash();
+ domain_crash(d);
}
rc = shadow_do_update_va_mapping(va, val, v);
{
l1_pgentry_t ol1e, nl1e;
int modified = 0, i;
- struct vcpu *v;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
if ( unlikely(!get_page_from_l1e(nl1e, d)) )
{
- MEM_LOG("ptwr: Could not re-validate l1 page");
/*
* Make the remaining p.t's consistent before crashing, so the
* reference counts are correct.
(L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
/* Crash the offending domain. */
- set_bit(_DOMF_ctrl_pause, &d->domain_flags);
- for_each_vcpu ( d, v )
- vcpu_sleep_nosync(v);
+ MEM_LOG("ptwr: Could not revalidate l1 page");
+ domain_crash(d);
break;
}
/* Toss the writable pagetable state and crash. */
unmap_domain_page(d->arch.ptwr[which].pl1e);
d->arch.ptwr[which].l1va = 0;
- domain_crash();
+ domain_crash(d);
return 0;
}
l4page = alloc_domheap_page(NULL);
if (l4page == NULL)
- domain_crash();
+ domain_crash(d);
l4 = map_domain_page(page_to_pfn(l4page));
memset(l4, 0, PAGE_SIZE);
l3page = alloc_domheap_page(NULL);
if (l3page == NULL)
- domain_crash();
+ domain_crash(d);
l3 = map_domain_page(page_to_pfn(l3page));
memset(l3, 0, PAGE_SIZE);
case MSR_FS_BASE:
if (!(VMX_LONG_GUEST(vc)))
/* XXX should it be GP fault */
- domain_crash();
+ domain_crash(vc->domain);
__vmread(GUEST_FS_BASE, &msr_content);
break;
case MSR_GS_BASE:
if (!(VMX_LONG_GUEST(vc)))
- domain_crash();
+ domain_crash(vc->domain);
__vmread(GUEST_GS_BASE, &msr_content);
break;
case MSR_SHADOW_GS_BASE:
case MSR_FS_BASE:
case MSR_GS_BASE:
if (!(VMX_LONG_GUEST(vc)))
- domain_crash();
+ domain_crash(vc->domain);
if (!IS_CANO_ADDRESS(msr_content)){
VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
vmx_inject_exception(vc, TRAP_gp_fault, 0);
case MSR_SHADOW_GS_BASE:
if (!(VMX_LONG_GUEST(vc)))
- domain_crash();
+ domain_crash(vc->domain);
vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
break;
mpfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
if (mpfn == INVALID_MFN) {
printk("Can not find E820 memory map page for VMX domain.\n");
- domain_crash();
+ domain_crash(d);
}
p = map_domain_page(mpfn);
if (p == NULL) {
printk("Can not map E820 memory map page for VMX domain.\n");
- domain_crash();
+ domain_crash(d);
}
e820_map_nr = *(p + E820_MAP_NR_OFFSET);
printk("Can not get io request shared page"
" from E820 memory map for VMX domain.\n");
unmap_domain_page(p);
- domain_crash();
+ domain_crash(d);
}
unmap_domain_page(p);
mpfn = get_mfn_from_pfn(gpfn);
if (mpfn == INVALID_MFN) {
printk("Can not find io request shared page for VMX domain.\n");
- domain_crash();
+ domain_crash(d);
}
p = map_domain_page(mpfn);
if (p == NULL) {
printk("Can not map io request shared page for VMX domain.\n");
- domain_crash();
+ domain_crash(d);
}
d->arch.vmx_platform.shared_page_va = (unsigned long)p;
}
-void domain_crash(void)
+void domain_crash(struct domain *d)
{
- printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
- current->domain->domain_id, current->vcpu_id, smp_processor_id());
- show_registers(guest_cpu_user_regs());
- domain_shutdown(SHUTDOWN_crash);
+ if ( d == current->domain )
+ {
+ printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
+ d->domain_id, current->vcpu_id, smp_processor_id());
+ show_registers(guest_cpu_user_regs());
+ }
+ else
+ {
+ printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
+ d->domain_id, current->domain->domain_id, smp_processor_id());
+ }
+
+ domain_shutdown(d, SHUTDOWN_crash);
}
void domain_crash_synchronous(void)
{
- domain_crash();
+ domain_crash(current->domain);
for ( ; ; )
do_softirq();
}
__initcall(domain_shutdown_finaliser_init);
-void domain_shutdown(u8 reason)
+void domain_shutdown(struct domain *d, u8 reason)
{
- struct domain *d = current->domain;
- struct vcpu *v;
+ struct vcpu *v;
if ( d->domain_id == 0 )
{
{
TRACE_3D(TRC_SCHED_SHUTDOWN,
current->domain->domain_id, current->vcpu_id, arg);
- domain_shutdown((u8)arg);
+ domain_shutdown(current->domain, (u8)arg);
break;
}
do { \
printk("__vmx_bug at %s:%d\n", __FILE__, __LINE__); \
show_registers(regs); \
- domain_crash(); \
+ domain_crash(current->domain); \
} while (0)
#endif //__ASSEMBLY__
struct domain *find_domain_by_id(domid_t dom);
extern void domain_destruct(struct domain *d);
extern void domain_kill(struct domain *d);
-extern void domain_shutdown(u8 reason);
+extern void domain_shutdown(struct domain *d, u8 reason);
extern void domain_pause_for_debugger(void);
/*
- * Mark current domain as crashed. This function returns: the domain is not
- * synchronously descheduled from any processor.
+ * Mark specified domain as crashed. This function always returns, even if the
+ * caller is the specified domain. The domain is not synchronously descheduled
+ * from any processor.
*/
-extern void domain_crash(void);
+extern void domain_crash(struct domain *d);
/*
* Mark current domain as crashed and synchronously deschedule from the local